header%20ipynb.png

Package version :
  1. Tensorflow 2.4.1
  2. Keras 2.4.3
  3. Matplotlib 3.5.0
In [1]:
## Just disables the warning, doesn't take advantage of AVX/FMA to run faster
# import os
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
In [4]:
from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import to_categorical

(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(-1, 28,28,1)
X_test = X_test.reshape(-1, 28,28,1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
y_train = to_categorical(y_train, 10)
y_test = to_categorical(y_test, 10)
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz
11493376/11490434 [==============================] - 10s 1us/step
In [5]:
#model1 menggunakan Multilayer Neural Network

from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Flatten, Dense

model1 = Sequential()
model1.add(Flatten())
model1.add(Dense(64,activation='relu'))
model1.add(Dense(10,activation='softmax'))
In [6]:
model1.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['acc'])
history1 = model1.fit(X_train,y_train,epochs=10,batch_size=100,validation_data=(X_test,y_test))
Epoch 1/10
600/600 [==============================] - 3s 3ms/step - loss: 0.3867 - acc: 0.8935 - val_loss: 0.2165 - val_acc: 0.9371
Epoch 2/10
600/600 [==============================] - 1s 2ms/step - loss: 0.1891 - acc: 0.9461 - val_loss: 0.1668 - val_acc: 0.9527
Epoch 3/10
600/600 [==============================] - 1s 2ms/step - loss: 0.1448 - acc: 0.9583 - val_loss: 0.1397 - val_acc: 0.9577
Epoch 4/10
600/600 [==============================] - 1s 2ms/step - loss: 0.1170 - acc: 0.9657 - val_loss: 0.1176 - val_acc: 0.9661
Epoch 5/10
600/600 [==============================] - 1s 2ms/step - loss: 0.0988 - acc: 0.9711 - val_loss: 0.1069 - val_acc: 0.9690
Epoch 6/10
600/600 [==============================] - 1s 2ms/step - loss: 0.0841 - acc: 0.9751 - val_loss: 0.1019 - val_acc: 0.9700
Epoch 7/10
600/600 [==============================] - 1s 2ms/step - loss: 0.0728 - acc: 0.9787 - val_loss: 0.0953 - val_acc: 0.9729
Epoch 8/10
600/600 [==============================] - 1s 2ms/step - loss: 0.0646 - acc: 0.9805 - val_loss: 0.0930 - val_acc: 0.9723
Epoch 9/10
600/600 [==============================] - 1s 2ms/step - loss: 0.0568 - acc: 0.9837 - val_loss: 0.0888 - val_acc: 0.9741
Epoch 10/10
600/600 [==============================] - 1s 2ms/step - loss: 0.0513 - acc: 0.9850 - val_loss: 0.0899 - val_acc: 0.9747
In [7]:
model1.summary()
Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
flatten (Flatten)            (100, 784)                0         
_________________________________________________________________
dense (Dense)                (100, 64)                 50240     
_________________________________________________________________
dense_1 (Dense)              (100, 10)                 650       
=================================================================
Total params: 50,890
Trainable params: 50,890
Non-trainable params: 0
_________________________________________________________________
In [8]:
model1.evaluate(X_test,y_test)
313/313 [==============================] - 1s 4ms/step - loss: 0.0899 - acc: 0.9747
Out[8]:
[0.08989637345075607, 0.9746999740600586]
In [9]:
import matplotlib.pyplot as plt
import warnings 
warnings.filterwarnings('ignore')

epochs = range(10)

loss1 = history1.history['loss']
val_loss1 = history1.history['val_loss']

plt.plot(epochs,loss1,'r',label='training loss ANN')
plt.plot(epochs,val_loss1,'b',label='validasi loss ANN')
plt.legend()
Out[9]:
<matplotlib.legend.Legend at 0x160b27edb20>
In [10]:
import numpy as np

pred = model1.predict(X_test)
print('label actual:',np.argmax(y_test[30]))
print('label prediction:',np.argmax(pred[30]))
label actual: 3
label prediction: 3